struct thread_struct *next = &next_p->thread;
physdev_op_t op;
multicall_entry_t _mcl[8], *mcl = _mcl;
- mmu_update_t _mmu[2], *mmu = _mmu;
-
- if ( mm_state_sync & STATE_SYNC_PT )
- {
- mmu->ptr = virt_to_machine(cur_pgd) | MMU_EXTENDED_COMMAND;
- mmu->val = MMUEXT_NEW_BASEPTR;
- mmu++;
- }
-
- if ( mm_state_sync & STATE_SYNC_LDT )
- {
- __asm__ __volatile__ (
- "xorl %%eax,%%eax; movl %%eax,%%fs; movl %%eax,%%gs" : : : "eax" );
- mmu->ptr = (unsigned long)next_p->mm->context.ldt |
- MMU_EXTENDED_COMMAND;
- mmu->val = (next_p->mm->context.size << MMUEXT_CMD_SHIFT) |
- MMUEXT_SET_LDT;
- mmu++;
- }
-
- if ( mm_state_sync != 0 )
- {
- mcl->op = __HYPERVISOR_mmu_update;
- mcl->args[0] = (unsigned long)_mmu;
- mcl->args[1] = mmu - _mmu;
- mcl->args[2] = 0;
- mcl++;
- mm_state_sync = 0;
- }
/*
* This is basically 'unlazy_fpu', except that we queue a multicall to
#endif
extern pgd_t *cur_pgd;
-extern int mm_state_sync;
-#define STATE_SYNC_PT 1
-#define STATE_SYNC_LDT 2
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
clear_bit(cpu, &prev->cpu_vm_mask);
/* Re-load page tables */
cur_pgd = next->pgd;
- mm_state_sync |= STATE_SYNC_PT;
+ xen_pt_switch(__pa(cur_pgd));
/* load_LDT, if either the previous or next thread
* has a non-default LDT.
*/
if (next->context.size+prev->context.size)
- mm_state_sync |= STATE_SYNC_LDT;
+ load_LDT(&next->context);
}
}
-#define activate_mm(prev, next) \
-do { \
- switch_mm((prev),(next),NULL,smp_processor_id()); \
- if (mm_state_sync & STATE_SYNC_PT) \
- xen_pt_switch(__pa(cur_pgd)); \
- if (mm_state_sync & STATE_SYNC_LDT) \
- load_LDT(&(next)->context); \
- mm_state_sync = 0; \
-} while ( 0 )
+#define activate_mm(prev, next) \
+ switch_mm((prev),(next),NULL,smp_processor_id())
#endif